{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- ASSERT(vmcb);
-
- vmcb->cr0 |= X86_CR0_TS;
+ /* FPU state already dirty? Then no need to setup_fpu() lazily. */
+ if ( test_bit(_VCPUF_fpu_dirtied, &v->vcpu_flags) )
+ return;
- if (!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS))
+ /*
+ * If the guest does not have TS enabled then we must cause and handle an
+ * exception on first use of the FPU. If the guest *does* have TS enabled
+ * then this is not necessary: no FPU activity can occur until the guest
+ * clears CR0.TS, and we will initialise the FPU when that happens.
+ */
+ if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
+ {
v->arch.hvm_svm.vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
+ vmcb->cr0 |= X86_CR0_TS;
+ }
}
static void arch_svm_do_launch(struct vcpu *v)
{
struct vcpu *v = current;
- clts();
-
setup_fpu(v);
+ vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
- if (!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS))
+ if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
vmcb->cr0 &= ~X86_CR0_TS;
-
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
}
vmcb->cr0 = value | X86_CR0_PG;
v->arch.hvm_svm.cpu_shadow_cr0 = value;
- /* Check if FP Unit Trap need to be on */
- if (value & X86_CR0_TS)
- {
- vmcb->exception_intercepts |= EXCEPTION_BITMAP_NM;
+ /* TS cleared? Then initialise FPU now. */
+ if ( !(value & X86_CR0_TS) )
+ {
+ setup_fpu(v);
+ vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
}
HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx\n", value);
break;
case INSTR_CLTS:
- clts();
+ /* TS being cleared means that it's time to restore fpu state. */
setup_fpu(current);
+ vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
- vmcb->exception_intercepts &= ~EXCEPTION_BITMAP_NM;
break;
case INSTR_LMSW:
struct vcpu *v = current;
struct hvm_virpit *vpit = &v->domain->arch.hvm_domain.vpit;
s_time_t next_pit = -1, next_wakeup;
- unsigned int inst_len;
- svm_stts(v);
- inst_len = __get_instruction_length(vmcb, INSTR_HLT, NULL);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(vmcb, 1);
if ( !v->vcpu_id )
next_pit = get_pit_scheduled(v, vpit);
static inline void svm_vmexit_do_mwait(void)
{
- return;
}
break;
case VMEXIT_INTR:
- svm_stts(v);
raise_softirq(SCHEDULE_SOFTIRQ);
break;
{
struct domain *d = v->domain;
struct hvm_virpit *vpit = &d->arch.hvm_domain.vpit;
-
+
+ svm_stts(v);
+
if ( test_bit(iopacket_port(d), &d->shared_info->evtchn_pending[0]) ||
test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags) )
hvm_wait_io();
if ( vpit->first_injected )
pickup_deactive_ticks(vpit);
svm_set_tsc_shift(v, vpit);
-
+
/* We can't resume the guest if we're waiting on I/O */
ASSERT(!test_bit(ARCH_HVM_IO_WAIT, &v->arch.hvm_vcpu.ioflags));
}
unsigned long cr0;
struct vcpu *v = current;
- clts();
setup_fpu(current);
+ __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
/* Disable TS in guest CR0 unless the guest wants the exception too. */
__vmread_vcpu(v, CR0_READ_SHADOW, &cr0);
cr0 &= ~X86_CR0_TS;
__vmwrite(GUEST_CR0, cr0);
}
-
- /* Xen itself doesn't need another exception. */
- __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
/* Reserved bits: [31:15], [12:11], [9], [6], [2:1] */
__vmread_vcpu(v, CR0_READ_SHADOW, &old_cr0);
paging_enabled = (old_cr0 & X86_CR0_PE) && (old_cr0 & X86_CR0_PG);
- /*
- * Disable TS? Then we do so at the same time, and initialise FPU.
- * This avoids needing another vmexit.
- */
- if ( (old_cr0 & ~value & X86_CR0_TS) != 0 )
+ /* TS cleared? Then initialise FPU now. */
+ if ( !(value & X86_CR0_TS) )
{
- clts();
setup_fpu(v);
+ __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
}
__vmwrite(GUEST_CR0, value | X86_CR0_PE | X86_CR0_PG | X86_CR0_NE);
TRACE_VMEXIT(1,TYPE_CLTS);
/* We initialise the FPU now, to avoid needing another vmexit. */
- clts();
- setup_fpu(current);
+ setup_fpu(v);
+ __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_NM);
__vmread_vcpu(v, GUEST_CR0, &value);
value &= ~X86_CR0_TS; /* clear TS */